/*** STACK LOCATION ***/
ENTRY(stack_start)
- .long SYMBOL_NAME(cpu0_stack) + 8100 - __PAGE_OFFSET
+ .long SYMBOL_NAME(cpu0_stack) + STACK_SIZE - 200 - __PAGE_OFFSET
.long __HYPERVISOR_DS
/*** DESCRIPTOR TABLES ***/
.quad SYMBOL_NAME(idt_table)
ENTRY(stack_start)
- .quad SYMBOL_NAME(cpu0_stack) + 8000
+ .quad SYMBOL_NAME(cpu0_stack) + STACK_SIZE - 200
high_start:
.quad __high_start
#ifdef MEMORY_GUARD
/* Unmap the first page of CPU0's stack. */
extern unsigned long cpu0_stack[];
- memguard_guard_range(cpu0_stack, PAGE_SIZE);
+ memguard_guard_stack(cpu0_stack);
#endif
open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
/* So we see what's up. */
printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
- stack = (void *)alloc_xenheap_pages(1);
+ stack = (void *)alloc_xenheap_pages(STACK_ORDER);
#if defined(__i386__)
stack_start.esp = __pa(stack) + STACK_SIZE - STACK_RESERVED;
#elif defined(__x86_64__)
#endif
/* Debug build: detect stack overflow by setting up a guard page. */
- memguard_guard_range(stack, PAGE_SIZE);
+ memguard_guard_stack(stack);
/*
* This grunge runs the startup process for
}
}
+void memguard_guard_stack(void *p)
+{
+ memguard_guard_range(p, PAGE_SIZE);
+}
+
void memguard_guard_range(void *p, unsigned long l)
{
__memguard_change_range(p, l, 1);
/* Disable the NMI watchdog. It's useless now. */
watchdog_on = 0;
+ console_force_unlock();
+
/* Find information saved during fault and dump it to the console. */
tss = &init_tss[cpu];
printk("CPU: %d\nEIP: %04x:[<%08x>] \nEFLAGS: %08x\n",
jmp error_code
ENTRY(nmi)
- iret
+ iretq
.data
#ifdef MEMORY_GUARD
-#if 1
-
-void *memguard_init(void *heap_start) { return heap_start; }
-void memguard_guard_range(void *p, unsigned long l) {}
-void memguard_unguard_range(void *p, unsigned long l) {}
-
-#else
-
+#define ALLOC_PT(_level) \
+do { \
+ (_level) = (_level ## _pgentry_t *)heap_start; \
+ heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE); \
+ clear_page(_level); \
+} while ( 0 )
void *memguard_init(void *heap_start)
{
- l1_pgentry_t *l1;
- int i, j;
+ l1_pgentry_t *l1 = NULL;
+ l2_pgentry_t *l2 = NULL;
+ l3_pgentry_t *l3 = NULL;
+ l4_pgentry_t *l4 = &idle_pg_table[l4_table_offset(PAGE_OFFSET)];
+ unsigned long i, j;
/* Round the allocation pointer up to a page boundary. */
heap_start = (void *)(((unsigned long)heap_start + (PAGE_SIZE-1)) &
/* Memory guarding is incompatible with super pages. */
for ( i = 0; i < (xenheap_phys_end >> L2_PAGETABLE_SHIFT); i++ )
{
- l1 = (l1_pgentry_t *)heap_start;
- heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
+ ALLOC_PT(l1);
for ( j = 0; j < ENTRIES_PER_L1_PAGETABLE; j++ )
l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
(j << L1_PAGETABLE_SHIFT) |
__PAGE_HYPERVISOR);
- idle_pg_table[i] = idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
- mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
+ if ( !((unsigned long)l2 & (PAGE_SIZE-1)) )
+ {
+ ALLOC_PT(l2);
+ if ( !((unsigned long)l3 & (PAGE_SIZE-1)) )
+ {
+ ALLOC_PT(l3);
+ *l4++ = mk_l4_pgentry(virt_to_phys(l3) | __PAGE_HYPERVISOR);
+ }
+ *l3++ = mk_l3_pgentry(virt_to_phys(l2) | __PAGE_HYPERVISOR);
+ }
+ *l2++ = mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
}
return heap_start;
{
l1_pgentry_t *l1;
l2_pgentry_t *l2;
+ l3_pgentry_t *l3;
+ l4_pgentry_t *l4;
unsigned long _p = (unsigned long)p;
unsigned long _l = (unsigned long)l;
while ( _l != 0 )
{
- l2 = &idle_pg_table[l2_table_offset(_p)];
- l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
+ l4 = &idle_pg_table[l4_table_offset(_p)];
+ l3 = l4_pgentry_to_l3(*l4) + l3_table_offset(_p);
+ l2 = l3_pgentry_to_l2(*l3) + l2_table_offset(_p);
+ l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
if ( guard )
*l1 = mk_l1_pgentry(l1_pgentry_val(*l1) & ~_PAGE_PRESENT);
else
}
}
+void memguard_guard_stack(void *p)
+{
+ p = (void *)((unsigned long)p + PAGE_SIZE);
+ memguard_guard_range(p, 2 * PAGE_SIZE);
+}
+
void memguard_guard_range(void *p, unsigned long l)
{
__memguard_change_range(p, l, 1);
}
#endif
-
-#endif
/* Disable the NMI watchdog. It's useless now. */
watchdog_on = 0;
+ console_force_unlock();
+
/* Find information saved during fault and dump it to the console. */
printk("************************************\n");
printk("EIP: %04lx:[<%p>] \nEFLAGS: %p\n",
#ifndef NDEBUG
#define MEMORY_GUARD
+#ifdef __x86_64__
+#define STACK_ORDER 2
#endif
+#endif
+
+#ifndef STACK_ORDER
+#define STACK_ORDER 1
+#endif
+#define STACK_SIZE (PAGE_SIZE << STACK_ORDER)
#ifndef __ASSEMBLY__
extern unsigned long _end; /* standard ELF symbol */
#ifdef MEMORY_GUARD
void *memguard_init(void *heap_start);
+void memguard_guard_stack(void *p);
void memguard_guard_range(void *p, unsigned long l);
void memguard_unguard_range(void *p, unsigned long l);
#else
#define memguard_init(_s) (_s)
+#define memguard_guard_stack(_p) ((void)0)
#define memguard_guard_range(_p,_l) ((void)0)
#define memguard_unguard_range(_p,_l) ((void)0)
#endif
#endif
#define PAGE_SHIFT L1_PAGETABLE_SHIFT
+#ifndef __ASSEMBLY__
#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#else
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#endif
#define PAGE_MASK (~(PAGE_SIZE-1))
#define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE)
#ifndef __SCHED_H__
#define __SCHED_H__
-#define STACK_SIZE (2*PAGE_SIZE)
-
#include <xen/config.h>
#include <xen/types.h>
#include <xen/spinlock.h>